adj_seq_cnt += 1
print("adj_seq",adj_seq_cnt)
if adj_seq_cnt == num_adj_seqs:
#首先這邊先取得instance_token_list的長度
# First, we need to reorganize the instance tokens (ids)
num_instance_token_list = len(save_instance_token_list)
if num_instance_token_list > 1:
#比較兩個sample下的 instance_token_list有交集的部份
common_tokens = set(save_instance_token_list[0]).intersection(save_instance_token_list[1])
#如果sample數大於2的話就繼續往後比 ex: 1跟2的交集, 2跟3的交集
for l in range(2, num_instance_token_list):
common_tokens = common_tokens.intersection(save_instance_token_list[l])
for l in range(num_instance_token_list):
exclusive_tokens = set(save_instance_token_list[l]).difference(common_tokens)
# we store the common instances first, then store the remaining instances
curr_save_data_dict = save_data_dict_list[l]
curr_save_box_dict = save_box_dict_list[l]
counter = 0
for token in common_tokens:
box_info = curr_save_box_dict['instance_boxes_' + token]
box_cat = curr_save_box_dict['category_' + token]
curr_save_data_dict['instance_boxes_' + str(counter)] = box_info
curr_save_data_dict['category_' + str(counter)] = box_cat
counter += 1
for token in exclusive_tokens:
box_info = curr_save_box_dict['instance_boxes_' + token]
box_cat = curr_save_box_dict['category_' + token]
curr_save_data_dict['instance_boxes_' + str(counter)] = box_info
curr_save_data_dict['category_' + str(counter)] = box_cat
counter += 1
assert counter == curr_save_data_dict['num_instances'], "The number of instances is inconsistent."
save_data_dict_list[l] = curr_save_data_dict
else:
curr_save_box_dict = save_box_dict_list[0]
print(len(save_box_dict_list))
print(curr_save_box_dict.keys())
print("FINISHED")
curr_save_data_dict = save_data_dict_list[0]
for index, token in enumerate(save_instance_token_list[0]):
box_info = curr_save_box_dict['instance_boxes_' + token]
box_cat = curr_save_box_dict['category_' + token]
curr_save_data_dict['instance_boxes_' + str(index)] = box_info
curr_save_data_dict['category_' + str(index)] = box_cat
save_data_dict_list[0] = curr_save_data_dict
# ------------------------ Now we generate dense BEV maps ------------------------
for seq_idx, seq_data_dict in enumerate(save_data_dict_list):
dense_bev_data = convert_to_dense_bev(seq_data_dict)
sparse_bev_data = convert_to_sparse_bev(dense_bev_data)
# save the data
#save_directory = check_folder(os.path.join(args.savepath, str(scene_idx) + '_' + str(save_seq_cnt)))
#save_file_name = os.path.join(save_directory, str(seq_idx) + '.npy')
#np.save(save_file_name, arr=sparse_bev_data)
print(" >> Finish sample: {}, sequence {}".format(save_seq_cnt, seq_idx))
# --------------------------------------------------------------------------------
print("len", len(save_box_dict_list))
save_seq_cnt += 1
adj_seq_cnt = 0
save_data_dict_list = list()
save_box_dict_list = list()
save_instance_token_list = list()
# Skip some keyframes if necessary
flag = False
for _ in range(num_keyframe_skipped + 1):
if curr_sample['next'] != '':
curr_sample = nusc.get('sample', curr_sample['next'])
else:
flag = True
break
if flag: # No more keyframes, stop the gen_process
break
else:
curr_sample_data = nusc.get('sample_data', curr_sample['data']['LIDAR_TOP'])
else:
flag = False
print("HI")
for _ in range(skip_frame + 1):
print("Hello")
if curr_sample_data['next'] != '':
curr_sample_data = nusc.get('sample_data', curr_sample_data['next']) #skip the frame
else:
flag = True
break
if flag: # No more sample frames , stop the gen_process
break